# step 1: make a list of images to read on
import os
import glob
# images are divided up into vehicles and non-vehicle folders (each of which contains subfolders)
# first locate vehicle images
basedir = 'vehicles/'
# diff folder represent diff sources for images e.g GTI, Kitti, generated by me
# below lists out all the subfolder inside vehciles folder
image_types = os.listdir(basedir)
cars = []
for imtype in image_types:
cars.extend(glob.glob(basedir+imtype+'/*'))
print('Number of Vehicle Images found:',len(cars))
# do same thing for non-vehicle images
basedir2 = 'non-vehicles/'
# diff folder represent diff sources for images e.g GTI, Kitti, generated by me
image_types = os.listdir(basedir2)
notcars = []
for imtype in image_types:
notcars.extend(glob.glob(basedir2+imtype+'/*'))
print('Number of Non-Vehicle Images found:',len(notcars))
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
import time
import pickle
from sklearn.svm import LinearSVC
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
from skimage.feature import hog
# Define a function to return HOG features and visualization
def get_hog_features(img, orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True):
# Call with two outputs if vis==True
if vis == True:
features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=False,
visualise=vis, feature_vector=feature_vec)
return features, hog_image
# Otherwise call with one output
else:
features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=False,
visualise=vis, feature_vector=feature_vec)
return features
# Define a function to compute binned color features
# downsampling the image
# take some of the 3 color channe
def bin_spatial(img, size=(32, 32)):
# Use cv2.resize().ravel() to create the feature vector
color1 = cv2.resize(img[:,:,0],size).ravel()
color2 = cv2.resize(img[:,:,1],size).ravel()
color3 = cv2.resize(img[:,:,2],size).ravel()
# Return the feature vector
return np.hstack((color1,color2,color3))
# Define a function to compute color histogram features
# need to change bins_range if reading .png files with mpimg
# as we read with mpimg, get rid of t bin range all together and let it be automatic
def color_hist(img, nbins=32):
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:,:,0], bins=nbins)
channel2_hist = np.histogram(img[:,:,1], bins=nbins)
channel3_hist = np.histogram(img[:,:,2], bins=nbins)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
# Define a function to extract features from a list of images
# Have this function call bin_spatial() and color_hist()
def extract_features(imgs, color_space='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=True, hist_feat=True, hog_feat=True):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file in imgs:
file_features = []
# Read in each one by one
image = mpimg.imread(file)
# apply color conversion if other than 'RGB'
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(image)
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
file_features.append(spatial_features)
if hist_feat == True:
# Apply color_hist()
hist_features = color_hist(feature_image, nbins=hist_bins)
file_features.append(hist_features)
if hog_feat == True:
# Call get_hog_features() with vis=False, feature_vec=True
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.append(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
hog_features = np.ravel(hog_features)
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
# Append the new feature vector to the features list
file_features.append(hog_features)
# do in same order when extracting car or not car otherwise classifer wont work
features.append(np.concatenate(file_features))
# Return list of feature vectors
return features
# Define a function that takes an image,
# start and stop positions in both x and y,
# window size (x and y dimensions),
# and overlap fraction (for both x and y)
def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
# If x and/or y start/stop positions not defined, set to image size
if x_start_stop[0] == None:
x_start_stop[0] = 0
if x_start_stop[1] == None:
x_start_stop[1] = img.shape[1]
if y_start_stop[0] == None:
y_start_stop[0] = 0
if y_start_stop[1] == None:
y_start_stop[1] = img.shape[0]
# Compute the span of the region to be searched
xspan = x_start_stop[1] - x_start_stop[0]
yspan = y_start_stop[1] - y_start_stop[0]
# Compute the number of pixels per step in x/y
nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))
ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))
# Compute the number of windows in x/y
nx_windows = np.int((xspan)/nx_pix_per_step) - 1
ny_windows = np.int((yspan)/ny_pix_per_step) - 1
# Initialize a list to append window positions to
window_list = []
# Loop through finding x and y window positions
# Note: you could vectorize this step, but in practice
# you'll be considering windows one by one with your
# classifier, so looping makes sense
for ys in range(ny_windows):
for xs in range(nx_windows):
# Calculate window position
startx = xs*nx_pix_per_step + x_start_stop[0]
endx = startx + xy_window[0]
starty = ys*ny_pix_per_step + y_start_stop[0]
endy = starty + xy_window[1]
# Append window position to list
window_list.append(((startx, starty), (endx, endy)))
# Return the list of windows
return window_list
# Define a function to draw bounding boxes
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# Define a function to extract features from a single image window
# This function is very similar to extract_features()
# just for a single image rather than list of images
# extract feature for huge trainign data and below for testing data
def single_img_features(img, color_space='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=True, hist_feat=True, hog_feat=True, vis = False):
#1) Define an empty list to receive features
img_features = []
#2) Apply color conversion if other than 'RGB'
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(img)
#3) Compute spatial features if flag is set
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
#4) Append features to list
img_features.append(spatial_features)
#5) Compute histogram features if flag is set
if hist_feat == True:
hist_features = color_hist(feature_image, nbins=hist_bins)
#6) Append features to list
img_features.append(hist_features)
#7) Compute HOG features if flag is set
if hog_feat == True:
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.append(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
hog_features = np.concatenate(hog_features)
else:
if vis == True:
hog_features,hog_image = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=True, feature_vec=True)
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
#8) Append features to list
img_features.append(hog_features)
#9) Return concatenated array of features
if vis ==True:
return np.concatenate(img_features), hog_image
else:
return np.concatenate(img_features)
# Define a function you will pass an image
# and the list of windows to be searched (output of slide_windows())
# we use linear SVM for classifier
# scaler to normalize the data
def search_windows(img, windows, clf, scaler, color_space='RGB',
spatial_size=(32, 32), hist_bins=32,
hist_range=(0, 256), orient=9,
pix_per_cell=8, cell_per_block=2,
hog_channel=0, spatial_feat=True,
hist_feat=True, hog_feat=True):
#1) Create an empty list to receive positive detection windows
on_windows = []
#2) Iterate over all windows in the list
for window in windows:
#3) Extract the test window from original image
test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
#4) Extract features for that window using single_img_features()
features = single_img_features(test_img, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
#5) Scale extracted features to be fed to classifier
test_features = scaler.transform(np.array(features).reshape(1, -1))
#6) Predict using your classifier
prediction = clf.predict(test_features)
#7) If positive (prediction == 1) then save the window
if prediction == 1:
on_windows.append(window)
#8) Return windows for positive detections
return on_windows
# define a function to plotting multiple image
def visualize(fig,rows,cols,imgs,titles):
for i,img in enumerate(imgs):
plt.subplot(rows,cols,i+1)
plt.title(i+1)
img_dims = len(img.shape)
if img_dims <3:
plt.imshow(img,cmap = 'hot')
plt.title(titles[i])
else:
plt.imshow(img)
plt.title(titles[i])
%matplotlib inline
# choose random car/not-car indices
car_ind = np.random.randint(0,len(cars))
notcar_ind = np.random.randint(0,len(notcars))
# Read in car/not-car images
car_image = mpimg.imread(cars[car_ind])
notcar_image = mpimg.imread(notcars[notcar_ind])
### Degine feature parameters
color_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 8
pix_per_cell = 8
cell_per_block = 2
hog_channel = 0 # Can be 0, 1, 2, or "ALL"
spatial_size = (16,16) # spatial binning dimensions
hist_bins = 48 # Number of histogram bins
spatial_feat = True # Spatial features on or off
hist_feat = True # Histogram features on or off
hog_feat = True # HOG features on or off
car_features, car_hog_image = single_img_features(car_image, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat, vis = True)
notcar_features, notcar_hog_image = single_img_features(notcar_image, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat, vis = True)
images = [car_image,car_hog_image,notcar_image,notcar_hog_image]
titles = ['car_image','car_hog_image','notcar_image','notcar_hog_image']
fig = plt.figure(figsize=(12,3))
visualize(fig,1,4,images,titles)
# Training classifier
### Degine feature parameters
color_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 8
pix_per_cell = 8
cell_per_block = 2
hog_channel = 'ALL' # Can be 0, 1, 2, or "ALL"
spatial_size = (16,16) # spatial binning dimensions
hist_bins = 48 # Number of histogram bins
spatial_feat = True # Spatial features on or off
hist_feat = True # Histogram features on or off
hog_feat = True # HOG features on or off
t = time.time()
#n_samples = 1000
#random_idxs = np.random.randint(0,len(cars),n_samples)
test_cars = cars #np.array(cars)[random_idxs]
test_notcars = notcars #np.array(notcars)[random_idxs]
car_features = extract_features(test_cars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
notcar_features = extract_features(test_notcars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
print(time.time()-t, 'Seconds to compute features...')
# Create an array stack of feature vectors
X = np.vstack((car_features, notcar_features)).astype(np.float64)
# Fit a per-column scaler
global X_scaler
X_scaler = StandardScaler().fit(X)
# Apply the scaler to X
scaled_X = X_scaler.transform(X)
# Define the labels vector
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
# Split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(
scaled_X, y, test_size=0.1, random_state=rand_state)
print('Using:',orient,'orientations,',pix_per_cell,
'pixels per cell,',cell_per_block,'cell_per_block,',
hist_bins,'histogram bins, and' , spatial_size,'spatial sampling')
print('Feature vector length:', len(X_train[0]))
# Use a linear SVC
svc = LinearSVC()
# Check the training time for the SVC
t=time.time()
svc.fit(X_train, y_train)
print(round(time.time()-t, 2), 'Seconds to train SVC...')
# Check the score of the SVC
print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
# Save svc and parameters to a pickle file
try:
f = open('svc_final.p', 'wb')
save = {
'svc': svc,
'X_scaler': X_scaler,
'orient': orient,
'pix_per_cell': pix_per_cell,
'cell_per_block': cell_per_block,
'spatial_size': spatial_size,
'hist_bins': hist_bins
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Erro saving data to', 'svc_final.p', ':', e)
raise
statinfo = os.stat('svc_final.p')
print('Compressed pickle size:', statinfo.st_size)
# now test classfier on test images folder example
searchpath = 'test_images/*'
example_images = glob.glob(searchpath)
images = []
titles = []
y_start_stop = [400,656] # Min and max in y to search in slide_window()
overlap = 0.5
for img_src in example_images:
t1 = time.time()
img = mpimg.imread(img_src)
draw_img = np.copy(img)
# reading in jpg and train on png
img = img.astype(np.float32)/255
print(np.min(img),np.max(img))
windows = slide_window(img,x_start_stop=[None,None],y_start_stop =y_start_stop,
# try 64 x 64 windows
# try 96 x 96 window
xy_window = (96,96), xy_overlap = (overlap,overlap))
hot_windows = search_windows(img, windows, svc, X_scaler, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
window_img = draw_boxes(draw_img, hot_windows, color=(0, 0, 255), thick=6)
images.append(window_img)
titles.append('')
print(time.time()-t1,'seconds to process one image searching', len(windows),'windows')
fig = plt.figure(figsize=(12,18), dpi = 300)
visualize(fig,5,2,images,titles)
def convert_color(img, conv = 'RGB2YCrCb'):
if conv == 'RGB2YCrCb':
return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
if conv == 'BGR2YCrCb':
return cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
if conv == 'RGB2LUV':
return cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
def add_heat(heatmap, bbox_list):
for box in bbox_list:
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
return heatmap
def implement_heatmap(img, box_list):
heat = np.zeros_like(img[:,:,0]).astype(np.float)
heat = add_heat(heat, box_list)
heat = apply_threshold(heat, 2)
heatmap = np.clip(heat, 0, 255)
labels = label(heatmap)
_, car_bbox = draw_labeled_bboxes(np.copy(img), labels)
return car_bbox
def find_cars(img, ystart, ystop, scale):
draw_img = np.copy(img)
# make a heatap of zeros
#heatmap = np.zeros_like(img[:,:,0])
img = img.astype(np.float32)/255
# cropped version of the image2
img_tosearch = img[ystart:ystop,:,:]
ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')
if scale != 1:
imshape = ctrans_tosearch.shape
# if do diff windows szie, we resize the whole image so diff search window
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
ch1 = ctrans_tosearch[:,:,0]
ch2 = ctrans_tosearch[:,:,1]
ch3 = ctrans_tosearch[:,:,2]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell)-1
nyblocks = (ch1.shape[0] // pix_per_cell)-1
nfeat_per_block = orient*cell_per_block**2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
# size of the original features vectors
window = 64
# below // is divide to get integers
nblocks_per_window = (window // pix_per_cell)-1
# so we have 75% overlap between widnows
cells_per_step = 2 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
nysteps = (nyblocks - nblocks_per_window) // cells_per_step
# Compute individual channel HOG features for the entire image
# we get multidimentsional arry where we can sample from
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
bbox = []
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb*cells_per_step
xpos = xb*cells_per_step
# Extract HOG for this patch
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
# Extract the image patch
subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64,64))
# Get color features
spatial_features = bin_spatial(subimg, size=spatial_size)
hist_features = color_hist(subimg, nbins=hist_bins)
# Scale features and make a prediction
test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))
#test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1))
test_prediction = svc.predict(test_features)
if test_prediction == 1:
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window*scale)
cv2.rectangle(draw_img,(xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart),(0,0,255),6)
bbox.append(((xbox_left, ytop_draw+ystart),(xbox_left + win_draw,ytop_draw+win_draw+ystart)))
return draw_img, bbox
from scipy.ndimage.measurements import label
# take threhold and label and return label
def apply_threshold(heatmap,threshold):
# zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# return thresholded map
return heatmap
def draw_labeled_bboxes(img, labels):
cars_bbox = []
# Iterate through all detected cars
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
bbox_w, bbox_h = abs(bbox[1][0] - bbox[0][0]), abs(bbox[1][1] - bbox[0][1])
if bbox[0][1] <= 300:
continue
if bbox[0][1] >= 600:
continue
if bbox[0][0] >= 1220:
continue
bbox_area = bbox_w * bbox_h
if bbox_area <= 2500:
continue
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)
cars_bbox.append(bbox)
# Return the image
return img,cars_bbox
ystart = 400
ystop = 656
scale = 1.5
for img_name in example_images:
img = mpimg.imread(img_name)
out_img, box_list = find_cars(img, ystart, ystop, scale)
heat = np.zeros_like(img[:,:,0]).astype(np.float)
# Add heat to each box in box list
heat = add_heat(heat,box_list)
# Apply threshold to help remove false positives
heat = apply_threshold(heat,1)
# Visualize the heatmap when displaying
heatmap = np.clip(heat, 0, 255)
# Find final boxes from heatmap using label function
labels = label(heatmap)
draw_img, _ = draw_labeled_bboxes(np.copy(img), labels)
fig = plt.figure(figsize=(18,8))
plt.subplot(131), plt.imshow(draw_img), plt.title('Car Positions')
plt.subplot(132), plt.imshow(heatmap, cmap='hot'), plt.title('Heat Map')
from collections import deque
# Define a class to receive the characteristics of each line detection
class Detection():
def __init__(self):
self.current_bboxes = []
self.past_bboxes = deque([], 12)
self.img = None
self.draw_img = None
self.frame = 0
self.new_ystop = 0
def draw_video(self, box_list):
self.past_bboxes.append(box_list)
heat = np.zeros_like(self.img[:,:,0]).astype(np.float)
for bbox_list in self.past_bboxes:
heat = add_heat(heat, bbox_list)
heat = apply_threshold(heat, len(self.past_bboxes)/3)
heatmap = np.clip(heat, 0, 255)
labels = label(heatmap)
self.draw_img, car_box = draw_labeled_bboxes(np.copy(self.img), labels)
if np.array(car_box).any():
self.current_bboxes = car_box
self.new_ystop = np.amax(np.array(car_box), axis=0)[1,1] + 64
else:
self.new_ystop = 0
return self.draw_img
def pipeline(img, cars, ystarts, ystops, scales):
box_list = []
cars.img = img
if (cars.frame % 8 == 0):
for ystart, ystop, scale in zip(ystarts, ystops, scales):
_, boxes = find_cars(img, ystart, ystop, scale)
box_list.extend(boxes)
car_bbox = implement_heatmap(img, box_list)
draw_img = cars.draw_video(car_bbox)
elif (cars.frame % 2 == 0) and (cars.new_ystop > 0):
new_ystops = [cars.new_ystop if x>cars.new_ystop else x for x in ystops]
k = len(scales)
for ystart, ystop, scale in zip(ystarts, new_ystops, scales):
_, boxes = find_cars(img, ystart, ystop, scale)
box_list.extend(boxes)
car_bbox = implement_heatmap(img, box_list)
draw_img = cars.draw_video(car_bbox)
else:
draw_img = draw_boxes(img, cars.current_bboxes)
cars.frame += 1
return draw_img
ystarts = [400, 400, 400, 400]
ystops = [496, 528, 592, 656]
scales = [1., 1.25, 1.5, 1.75]
def process_image(img):
return pipeline(img, cars, ystarts, ystops, scales)
from moviepy.editor import VideoFileClip
from IPython.display import HTML
cars = Detection()
# saving off the image-1
test_output = 'project_video_result.mp4'
clip1 = VideoFileClip('project_video.mp4')
test_clip = clip1.fl_image(process_image)
test_clip.write_videofile(test_output, audio = False)
HTML("""
<video width="960" height = "540" controls>
<source src="{0}">
</video>
""".format(test_output))